Loading Libraries¶

In [1]:
!pip install imbalanced-learn
!pip install tensorflow
!pip install scikit-learn
!pip install seaborn
!pip install keras_tuner
!pip install xgboost
Requirement already satisfied: imbalanced-learn in c:\users\aberg\anaconda3\lib\site-packages (0.12.3)
Requirement already satisfied: numpy>=1.17.3 in c:\users\aberg\anaconda3\lib\site-packages (from imbalanced-learn) (1.26.4)
Requirement already satisfied: scipy>=1.5.0 in c:\users\aberg\anaconda3\lib\site-packages (from imbalanced-learn) (1.13.1)
Requirement already satisfied: scikit-learn>=1.0.2 in c:\users\aberg\anaconda3\lib\site-packages (from imbalanced-learn) (1.5.1)
Requirement already satisfied: joblib>=1.1.1 in c:\users\aberg\anaconda3\lib\site-packages (from imbalanced-learn) (1.4.2)
Requirement already satisfied: threadpoolctl>=2.0.0 in c:\users\aberg\anaconda3\lib\site-packages (from imbalanced-learn) (3.5.0)
Requirement already satisfied: tensorflow in c:\users\aberg\anaconda3\lib\site-packages (2.19.0)
Requirement already satisfied: absl-py>=1.0.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (2.3.1)
Requirement already satisfied: astunparse>=1.6.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (1.6.3)
Requirement already satisfied: flatbuffers>=24.3.25 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (25.2.10)
Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (0.6.0)
Requirement already satisfied: google-pasta>=0.1.1 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (0.2.0)
Requirement already satisfied: libclang>=13.0.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (18.1.1)
Requirement already satisfied: opt-einsum>=2.3.2 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (3.4.0)
Requirement already satisfied: packaging in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (24.1)
Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (4.25.3)
Requirement already satisfied: requests<3,>=2.21.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (2.32.3)
Requirement already satisfied: setuptools in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (75.1.0)
Requirement already satisfied: six>=1.12.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (1.16.0)
Requirement already satisfied: termcolor>=1.1.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (3.1.0)
Requirement already satisfied: typing-extensions>=3.6.6 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (4.11.0)
Requirement already satisfied: wrapt>=1.11.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (1.14.1)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (1.73.1)
Requirement already satisfied: tensorboard~=2.19.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (2.19.0)
Requirement already satisfied: keras>=3.5.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (3.10.0)
Requirement already satisfied: numpy<2.2.0,>=1.26.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (1.26.4)
Requirement already satisfied: h5py>=3.11.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (3.11.0)
Requirement already satisfied: ml-dtypes<1.0.0,>=0.5.1 in c:\users\aberg\anaconda3\lib\site-packages (from tensorflow) (0.5.1)
Requirement already satisfied: wheel<1.0,>=0.23.0 in c:\users\aberg\anaconda3\lib\site-packages (from astunparse>=1.6.0->tensorflow) (0.44.0)
Requirement already satisfied: rich in c:\users\aberg\anaconda3\lib\site-packages (from keras>=3.5.0->tensorflow) (13.7.1)
Requirement already satisfied: namex in c:\users\aberg\anaconda3\lib\site-packages (from keras>=3.5.0->tensorflow) (0.1.0)
Requirement already satisfied: optree in c:\users\aberg\anaconda3\lib\site-packages (from keras>=3.5.0->tensorflow) (0.16.0)
Requirement already satisfied: charset-normalizer<4,>=2 in c:\users\aberg\anaconda3\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in c:\users\aberg\anaconda3\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in c:\users\aberg\anaconda3\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\aberg\anaconda3\lib\site-packages (from requests<3,>=2.21.0->tensorflow) (2025.6.15)
Requirement already satisfied: markdown>=2.6.8 in c:\users\aberg\anaconda3\lib\site-packages (from tensorboard~=2.19.0->tensorflow) (3.4.1)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in c:\users\aberg\anaconda3\lib\site-packages (from tensorboard~=2.19.0->tensorflow) (0.7.2)
Requirement already satisfied: werkzeug>=1.0.1 in c:\users\aberg\anaconda3\lib\site-packages (from tensorboard~=2.19.0->tensorflow) (3.0.3)
Requirement already satisfied: MarkupSafe>=2.1.1 in c:\users\aberg\anaconda3\lib\site-packages (from werkzeug>=1.0.1->tensorboard~=2.19.0->tensorflow) (2.1.3)
Requirement already satisfied: markdown-it-py>=2.2.0 in c:\users\aberg\anaconda3\lib\site-packages (from rich->keras>=3.5.0->tensorflow) (2.2.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\users\aberg\anaconda3\lib\site-packages (from rich->keras>=3.5.0->tensorflow) (2.15.1)
Requirement already satisfied: mdurl~=0.1 in c:\users\aberg\anaconda3\lib\site-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow) (0.1.0)
Requirement already satisfied: scikit-learn in c:\users\aberg\anaconda3\lib\site-packages (1.5.1)
Requirement already satisfied: numpy>=1.19.5 in c:\users\aberg\anaconda3\lib\site-packages (from scikit-learn) (1.26.4)
Requirement already satisfied: scipy>=1.6.0 in c:\users\aberg\anaconda3\lib\site-packages (from scikit-learn) (1.13.1)
Requirement already satisfied: joblib>=1.2.0 in c:\users\aberg\anaconda3\lib\site-packages (from scikit-learn) (1.4.2)
Requirement already satisfied: threadpoolctl>=3.1.0 in c:\users\aberg\anaconda3\lib\site-packages (from scikit-learn) (3.5.0)
Requirement already satisfied: seaborn in c:\users\aberg\anaconda3\lib\site-packages (0.13.2)
Requirement already satisfied: numpy!=1.24.0,>=1.20 in c:\users\aberg\anaconda3\lib\site-packages (from seaborn) (1.26.4)
Requirement already satisfied: pandas>=1.2 in c:\users\aberg\anaconda3\lib\site-packages (from seaborn) (2.2.2)
Requirement already satisfied: matplotlib!=3.6.1,>=3.4 in c:\users\aberg\anaconda3\lib\site-packages (from seaborn) (3.9.2)
Requirement already satisfied: contourpy>=1.0.1 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.2.0)
Requirement already satisfied: cycler>=0.10 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (0.11.0)
Requirement already satisfied: fonttools>=4.22.0 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (4.51.0)
Requirement already satisfied: kiwisolver>=1.3.1 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.4.4)
Requirement already satisfied: packaging>=20.0 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (24.1)
Requirement already satisfied: pillow>=8 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (10.4.0)
Requirement already satisfied: pyparsing>=2.3.1 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (3.1.2)
Requirement already satisfied: python-dateutil>=2.7 in c:\users\aberg\anaconda3\lib\site-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (2.9.0.post0)
Requirement already satisfied: pytz>=2020.1 in c:\users\aberg\anaconda3\lib\site-packages (from pandas>=1.2->seaborn) (2024.1)
Requirement already satisfied: tzdata>=2022.7 in c:\users\aberg\anaconda3\lib\site-packages (from pandas>=1.2->seaborn) (2023.3)
Requirement already satisfied: six>=1.5 in c:\users\aberg\anaconda3\lib\site-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.4->seaborn) (1.16.0)
Requirement already satisfied: keras_tuner in c:\users\aberg\anaconda3\lib\site-packages (1.4.7)
Requirement already satisfied: keras in c:\users\aberg\anaconda3\lib\site-packages (from keras_tuner) (3.10.0)
Requirement already satisfied: packaging in c:\users\aberg\anaconda3\lib\site-packages (from keras_tuner) (24.1)
Requirement already satisfied: requests in c:\users\aberg\anaconda3\lib\site-packages (from keras_tuner) (2.32.3)
Requirement already satisfied: kt-legacy in c:\users\aberg\anaconda3\lib\site-packages (from keras_tuner) (1.0.5)
Requirement already satisfied: absl-py in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (2.3.1)
Requirement already satisfied: numpy in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (1.26.4)
Requirement already satisfied: rich in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (13.7.1)
Requirement already satisfied: namex in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (0.1.0)
Requirement already satisfied: h5py in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (3.11.0)
Requirement already satisfied: optree in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (0.16.0)
Requirement already satisfied: ml-dtypes in c:\users\aberg\anaconda3\lib\site-packages (from keras->keras_tuner) (0.5.1)
Requirement already satisfied: charset-normalizer<4,>=2 in c:\users\aberg\anaconda3\lib\site-packages (from requests->keras_tuner) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in c:\users\aberg\anaconda3\lib\site-packages (from requests->keras_tuner) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in c:\users\aberg\anaconda3\lib\site-packages (from requests->keras_tuner) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in c:\users\aberg\anaconda3\lib\site-packages (from requests->keras_tuner) (2025.6.15)
Requirement already satisfied: typing-extensions>=4.6.0 in c:\users\aberg\anaconda3\lib\site-packages (from optree->keras->keras_tuner) (4.11.0)
Requirement already satisfied: markdown-it-py>=2.2.0 in c:\users\aberg\anaconda3\lib\site-packages (from rich->keras->keras_tuner) (2.2.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in c:\users\aberg\anaconda3\lib\site-packages (from rich->keras->keras_tuner) (2.15.1)
Requirement already satisfied: mdurl~=0.1 in c:\users\aberg\anaconda3\lib\site-packages (from markdown-it-py>=2.2.0->rich->keras->keras_tuner) (0.1.0)
Requirement already satisfied: xgboost in c:\users\aberg\anaconda3\lib\site-packages (3.0.2)
Requirement already satisfied: numpy in c:\users\aberg\anaconda3\lib\site-packages (from xgboost) (1.26.4)
Requirement already satisfied: scipy in c:\users\aberg\anaconda3\lib\site-packages (from xgboost) (1.13.1)
In [2]:
import imblearn
import time
import tensorflow as tf
import sklearn
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import keras_tuner
from xgboost import XGBClassifier
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Dense, BatchNormalization, Dropout
from tensorflow.keras import Sequential
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC 
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, RobustScaler
from sklearn.pipeline import Pipeline
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, learning_curve, GridSearchCV, cross_val_score, RandomizedSearchCV
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, precision_score, recall_score,\
    f1_score, roc_curve, auc, roc_auc_score
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.decomposition import PCA
from keras_tuner.tuners import RandomSearch
# from ydatasynthetic import streamlit_app
# from ydata_synthetic.synthesizers.regular import RegularSynthesizer
# from ydata_synthetic.synthesizers import ModelParameters, TrainParameters

Exploratory Data Analysis¶

Loading the data¶

In [3]:
# Load the file into a DataFrame
apple_quality = pd.read_csv("apple_quality.csv")

print(apple_quality.head())
   A_id      Size    Weight  Sweetness  Crunchiness  Juiciness  Ripeness  \
0   0.0 -3.970049 -2.512336   5.346330    -1.012009   1.844900  0.329840   
1   1.0 -1.195217 -2.839257   3.664059     1.588232   0.853286  0.867530   
2   2.0 -0.292024 -1.351282  -1.738429    -0.342616   2.838636 -0.038033   
3   3.0 -0.657196 -2.271627   1.324874    -0.097875   3.637970 -3.413761   
4   4.0  1.364217 -1.296612  -0.384658    -0.553006   3.030874 -1.303849   

        Acidity Quality  
0  -0.491590483    good  
1  -0.722809367    good  
2   2.621636473     bad  
3   0.790723217    good  
4   0.501984036    good  
In [4]:
# Get the dimensions of the dataset
dimensions = apple_quality.shape
print("Number of rows:", dimensions[0])
print("Number of columns:", dimensions[1])
Number of rows: 4001
Number of columns: 9
In [5]:
print(apple_quality.info())
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 4001 entries, 0 to 4000
Data columns (total 9 columns):
 #   Column       Non-Null Count  Dtype  
---  ------       --------------  -----  
 0   A_id         4000 non-null   float64
 1   Size         4000 non-null   float64
 2   Weight       4000 non-null   float64
 3   Sweetness    4000 non-null   float64
 4   Crunchiness  4000 non-null   float64
 5   Juiciness    4000 non-null   float64
 6   Ripeness     4000 non-null   float64
 7   Acidity      4001 non-null   object 
 8   Quality      4000 non-null   object 
dtypes: float64(7), object(2)
memory usage: 281.4+ KB
None

Dropping NaN¶

In [6]:
print(apple_quality.isnull().sum())
A_id           1
Size           1
Weight         1
Sweetness      1
Crunchiness    1
Juiciness      1
Ripeness       1
Acidity        0
Quality        1
dtype: int64
In [7]:
apple_quality_cleaned = apple_quality.dropna()

print(apple_quality_cleaned.isnull().sum())
A_id           0
Size           0
Weight         0
Sweetness      0
Crunchiness    0
Juiciness      0
Ripeness       0
Acidity        0
Quality        0
dtype: int64

Re-labeling¶

In [8]:
# Label encoding: 1 = good, 0 = bad
label_mapping = {"good": 1, "bad": 0}
apple_quality_cleaned = apple_quality_cleaned.copy()
apple_quality_cleaned['Quality'] = apple_quality_cleaned['Quality'].replace(label_mapping)
C:\Users\aberg\AppData\Local\Temp\ipykernel_60408\1254249889.py:4: FutureWarning: Downcasting behavior in `replace` is deprecated and will be removed in a future version. To retain the old behavior, explicitly call `result.infer_objects(copy=False)`. To opt-in to the future behavior, set `pd.set_option('future.no_silent_downcasting', True)`
  apple_quality_cleaned['Quality'] = apple_quality_cleaned['Quality'].replace(label_mapping)
In [9]:
apple_quality_cleaned.head()
Out[9]:
A_id Size Weight Sweetness Crunchiness Juiciness Ripeness Acidity Quality
0 0.0 -3.970049 -2.512336 5.346330 -1.012009 1.844900 0.329840 -0.491590483 1
1 1.0 -1.195217 -2.839257 3.664059 1.588232 0.853286 0.867530 -0.722809367 1
2 2.0 -0.292024 -1.351282 -1.738429 -0.342616 2.838636 -0.038033 2.621636473 0
3 3.0 -0.657196 -2.271627 1.324874 -0.097875 3.637970 -3.413761 0.790723217 1
4 4.0 1.364217 -1.296612 -0.384658 -0.553006 3.030874 -1.303849 0.501984036 1

Fixing data type of a feature¶

In [10]:
# Check column data types
column_data_types = apple_quality_cleaned.dtypes
print(column_data_types)
A_id           float64
Size           float64
Weight         float64
Sweetness      float64
Crunchiness    float64
Juiciness      float64
Ripeness       float64
Acidity         object
Quality          int64
dtype: object
In [11]:
# Change 'Acidity' data type to float64
apple_quality_cleaned['Acidity'] = apple_quality_cleaned['Acidity'].astype('float64')
print(apple_quality_cleaned.info())
<class 'pandas.core.frame.DataFrame'>
Index: 4000 entries, 0 to 3999
Data columns (total 9 columns):
 #   Column       Non-Null Count  Dtype  
---  ------       --------------  -----  
 0   A_id         4000 non-null   float64
 1   Size         4000 non-null   float64
 2   Weight       4000 non-null   float64
 3   Sweetness    4000 non-null   float64
 4   Crunchiness  4000 non-null   float64
 5   Juiciness    4000 non-null   float64
 6   Ripeness     4000 non-null   float64
 7   Acidity      4000 non-null   float64
 8   Quality      4000 non-null   int64  
dtypes: float64(8), int64(1)
memory usage: 312.5 KB
None
In [12]:
apple_quality_cleaned.describe()
Out[12]:
A_id Size Weight Sweetness Crunchiness Juiciness Ripeness Acidity Quality
count 4000.000000 4000.000000 4000.000000 4000.000000 4000.000000 4000.000000 4000.000000 4000.000000 4000.000000
mean 1999.500000 -0.503015 -0.989547 -0.470479 0.985478 0.512118 0.498277 0.076877 0.501000
std 1154.844867 1.928059 1.602507 1.943441 1.402757 1.930286 1.874427 2.110270 0.500062
min 0.000000 -7.151703 -7.149848 -6.894485 -6.055058 -5.961897 -5.864599 -7.010538 0.000000
25% 999.750000 -1.816765 -2.011770 -1.738425 0.062764 -0.801286 -0.771677 -1.377424 0.000000
50% 1999.500000 -0.513703 -0.984736 -0.504758 0.998249 0.534219 0.503445 0.022609 1.000000
75% 2999.250000 0.805526 0.030976 0.801922 1.894234 1.835976 1.766212 1.510493 1.000000
max 3999.000000 6.406367 5.790714 6.374916 7.619852 7.364403 7.237837 7.404736 1.000000

Dropping A_id¶

In [13]:
#The feature A_id adds no value to the model and can be dropped
apple_quality_cleaned.drop("A_id", axis=1, inplace=True)

Data Visualization¶

Distribution Graph¶

In [14]:
for column in apple_quality_cleaned.columns:
    sns.histplot(apple_quality_cleaned[column], bins=10, kde=True)
    plt.title(f'Distribution of {column}')
    plt.xlabel(column)
    plt.ylabel('Frequency')
    plt.show()
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [15]:
sns.countplot(data=apple_quality_cleaned, x='Quality')
plt.title('Countplot for Quality')
plt.xlabel('Quality')
plt.ylabel('Count')
plt.xticks(rotation=45)
plt.show()
No description has been provided for this image

Correlation Heat Map¶

In [16]:
# Correlation Heatmap
corr_matrix = apple_quality_cleaned.corr()
sns.heatmap(corr_matrix, annot=True, cmap='coolwarm')
plt.title('Correlation Heatmap')
plt.show()
No description has been provided for this image

Scatterplot Pair Plot¶

In [17]:
# Pairplot
sns.pairplot(apple_quality_cleaned, hue='Quality')
plt.show()
No description has been provided for this image

Joint Plot of Ripeness Against Other Features¶

In [18]:
for column in apple_quality_cleaned.columns:
    if column == 'Quality' or column == 'Ripeness':
        break
    sns.jointplot(x='Ripeness', y=column, hue='Quality', data=apple_quality_cleaned, palette='tab10', s=9)
    plt.show()
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image

Box Plot¶

In [19]:
# Boxplot
for column in apple_quality_cleaned.columns:
    if column == 'Quality':
        break
    plt.figure(figsize=(10, 2))
    sns.boxplot(x=column, data=apple_quality_cleaned)
    plt.show()
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image

Density Plot¶

In [20]:
for column in apple_quality_cleaned.columns:
    if column == 'Quality':
        break
    sns.kdeplot(data=apple_quality_cleaned, x=column, hue='Quality', fill=True, alpha=0.5)
    plt.title(f'Distribution of {column} by Quality')
    plt.xlabel(column)
    plt.ylabel('Density')
    plt.show()
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
No description has been provided for this image
In [ ]:
 

Outlier Detection With IQR Method and Scaling¶

In [21]:
apple_quality_normal = apple_quality_cleaned.copy()
In [22]:
apple_quality_normal.info()
<class 'pandas.core.frame.DataFrame'>
Index: 4000 entries, 0 to 3999
Data columns (total 8 columns):
 #   Column       Non-Null Count  Dtype  
---  ------       --------------  -----  
 0   Size         4000 non-null   float64
 1   Weight       4000 non-null   float64
 2   Sweetness    4000 non-null   float64
 3   Crunchiness  4000 non-null   float64
 4   Juiciness    4000 non-null   float64
 5   Ripeness     4000 non-null   float64
 6   Acidity      4000 non-null   float64
 7   Quality      4000 non-null   int64  
dtypes: float64(7), int64(1)
memory usage: 281.2 KB
In [23]:
numerical_features = ['Size', 'Weight', 'Sweetness', 'Crunchiness', 'Juiciness', 'Ripeness',
       'Acidity']

robust_scaler = RobustScaler()

apple_quality_normal[numerical_features] = robust_scaler.fit_transform(apple_quality_normal[numerical_features])
In [24]:
def count_outliers(series):
    Q1 = series.quantile(0.25)
    Q3 = series.quantile(0.75)
    IQR = Q3 - Q1
    lower_bound = Q1 - 1.5 * IQR
    upper_bound = Q3 + 1.5 * IQR
    return ((series < lower_bound) | (series > upper_bound)).sum()

for feature in numerical_features :
    num_outliers = count_outliers(apple_quality_normal[feature])
    print(f'Number of outliers in {feature}: {num_outliers}')
Number of outliers in Size: 22
Number of outliers in Weight: 54
Number of outliers in Sweetness: 32
Number of outliers in Crunchiness: 47
Number of outliers in Juiciness: 32
Number of outliers in Ripeness: 24
Number of outliers in Acidity: 20
In [25]:
scaler = StandardScaler()

apple_quality_normal[numerical_features] = scaler.fit_transform(apple_quality_normal[numerical_features])

Model preparation¶

Splitting Data into X and y¶

In [26]:
X_normal = apple_quality_normal.drop("Quality", axis=1)
y_normal = apple_quality_normal["Quality"]
display(X_normal.tail())
display(y_normal.tail())
Size Weight Sweetness Crunchiness Juiciness Ripeness Acidity
3995 0.291729 -0.048594 -1.669449 -0.365345 0.614425 0.931482 0.028866
3996 0.108878 1.834105 0.137124 -1.159058 -0.252634 -0.846326 0.842347
3997 -1.105655 -0.716904 -1.013784 -0.234036 0.874379 2.275957 -0.668950
3998 -1.818112 -0.492908 1.459901 -0.845446 0.854549 -0.151419 -1.093171
3999 0.405409 -0.453071 0.304496 -1.525439 0.390954 -0.680212 0.721761
3995    0
3996    1
3997    0
3998    1
3999    1
Name: Quality, dtype: int64
In [27]:
X = apple_quality_cleaned.drop("Quality", axis=1)
y = apple_quality_cleaned["Quality"]
display(X.tail())
display(y.tail())
Size Weight Sweetness Crunchiness Juiciness Ripeness Acidity
3995 0.059386 -1.067408 -3.714549 0.473052 1.697986 2.244055 0.137784
3996 -0.293118 1.949253 -0.204020 -0.640196 0.024523 -1.087900 1.854235
3997 -2.634515 -2.138247 -2.440461 0.657223 2.199709 4.763859 -1.334611
3998 -4.008004 -1.779337 2.366397 -0.200329 2.161435 0.214488 -2.229720
3999 0.278540 -1.715505 0.121217 -1.154075 1.266677 -0.776571 1.599796
3995    0
3996    1
3997    0
3998    1
3999    1
Name: Quality, dtype: int64

Train Test Split Data¶

In [28]:
#Split data in trainset and testset
X_train_normal, X_test_normal, y_train_normal, y_test_normal = train_test_split(X_normal,\
                                                                    y_normal, test_size=0.3, random_state=42)
In [29]:
#Split data in trainset and testset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

Custom Function¶

In [30]:
#Function that automates the search for the best hyperparameters
def gridsearch_optim(param_grid, model, X, y, cv):
    # Create GridSearchCV instance
    grid_search = GridSearchCV(estimator=model, param_grid=param_grid, cv=cv, scoring='f1', verbose=True)

    # Fit the grid search to the training data
    start_time = time.time()
    grid_search.fit(X, y)

    # Get the best parameters and best score
    best_params = grid_search.best_params_
    best_score = grid_search.best_score_

    print("Best Parameters:", best_params)
    print("Best Score:", best_score)
    print(f"Run Time: {time.time() - start_time} Seconds")
    
    return best_params
In [31]:
def test_model(y_test, y_pred):
    # Evaluate the model's performance
    accuracy = accuracy_score(y_test, y_pred)
    print("\nAccuracy:", accuracy)

    # Additional evaluation metrics
    print("\nClassification Report:")
    print(classification_report(y_test, y_pred))

    print("\nConfusion Matrix:")
    print(confusion_matrix(y_test, y_pred))

Model Training¶

Random Forest¶

A quick analysis of the feature importance calculated from the Random Forest model suggests that all 7 features have a somewhat critical contribution to the model, therefore feature extraction or feature selection might be counterproductive in this case. We should validate this finding with a feature reduction method like PCA

With Normalized Data¶

In [32]:
#From Kaggle Notebook**
RF_normal_param_grid = {
    'n_estimators': [50, 100, 200, 300],
    'max_depth': [None, 10, 20, 30],
    'min_samples_split': [2, 5, 10],
    'min_samples_leaf': [1, 2, 4],
    'bootstrap': [True, False],
    'criterion': ['gini', 'entropy'],
}

RF_normal = RandomForestClassifier()

randomized_search_RF = RandomizedSearchCV(
    RF_normal,
    param_distributions=RF_normal_param_grid,
    n_iter=10,
    cv=5,
    scoring='accuracy',
    random_state=42,
    n_jobs=-1
)

randomized_search_RF.fit(X_train_normal, y_train_normal)

best_params_RF_normal = randomized_search_RF.best_params_
print(f"Best Hyperparameters for Random Forest: {best_params_RF_normal}")

best_RF_model = randomized_search_RF.best_estimator_

y_pred_RF_normal = best_RF_model.predict(X_test_normal)

test_model(y_test_normal, y_pred_RF_normal)
Best Hyperparameters for Random Forest: {'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 2, 'max_depth': 20, 'criterion': 'entropy', 'bootstrap': True}

Accuracy: 0.895

Classification Report:
              precision    recall  f1-score   support

           0       0.89      0.90      0.89       593
           1       0.90      0.89      0.90       607

    accuracy                           0.90      1200
   macro avg       0.89      0.90      0.89      1200
weighted avg       0.90      0.90      0.90      1200


Confusion Matrix:
[[531  62]
 [ 64 543]]

Without Normalized Data¶

In [33]:
# Retrain RF model with best parameters
RF_model = RandomForestClassifier(**best_params_RF_normal)
RF_model.fit(X_train, y_train)

RF_predictions = RF_model.predict(X_test)

test_model(y_test, RF_predictions)
Accuracy: 0.895

Classification Report:
              precision    recall  f1-score   support

           0       0.90      0.89      0.89       593
           1       0.89      0.90      0.90       607

    accuracy                           0.90      1200
   macro avg       0.90      0.89      0.89      1200
weighted avg       0.90      0.90      0.89      1200


Confusion Matrix:
[[526  67]
 [ 59 548]]

SVM¶

With Normalized Data¶

In [35]:
#From Kaggle Notebook**
SVM_param_grid = {
    'C': [0.1, 1, 10, 100],
    'kernel': ['linear', 'rbf', 'poly'],
    'gamma': ['scale', 'auto', 0.1, 1],
}

svc_model = SVC()

randomized_search = RandomizedSearchCV(svc_model, param_distributions=SVM_param_grid, n_iter=10, cv=5, scoring='accuracy',\
                                       random_state=42, n_jobs=-1)

randomized_search.fit(X_train_normal, y_train_normal)

svm_best_params = randomized_search.best_params_
print(f"Best Hyperparameters: {svm_best_params}")

best_svc_model = randomized_search.best_estimator_
svc_predicted = best_svc_model.predict(X_test_normal)

test_model(y_test_normal, svc_predicted)
Best Hyperparameters: {'kernel': 'rbf', 'gamma': 'auto', 'C': 100}

Accuracy: 0.9116666666666666

Classification Report:
              precision    recall  f1-score   support

           0       0.90      0.93      0.91       593
           1       0.93      0.90      0.91       607

    accuracy                           0.91      1200
   macro avg       0.91      0.91      0.91      1200
weighted avg       0.91      0.91      0.91      1200


Confusion Matrix:
[[549  44]
 [ 62 545]]
In [36]:
fpr, tpr, thresholds = roc_curve(y_test, svc_predicted)

roc_auc = auc(fpr, tpr)

plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='Random')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc='lower right')
plt.show()
No description has been provided for this image

Not Normalized Data¶

In [37]:
#Retrain SVM model with best parameters
SVM_model = SVC(**svm_best_params)
SVM_model.fit(X_train, y_train)

y_pred_SVM = SVM_model.predict(X_test)

# Evaluate model performance
test_model(y_test, y_pred_SVM)
Accuracy: 0.8883333333333333

Classification Report:
              precision    recall  f1-score   support

           0       0.88      0.90      0.89       593
           1       0.90      0.88      0.89       607

    accuracy                           0.89      1200
   macro avg       0.89      0.89      0.89      1200
weighted avg       0.89      0.89      0.89      1200


Confusion Matrix:
[[533  60]
 [ 74 533]]
In [38]:
fpr, tpr, thresholds = roc_curve(y_test, y_pred_SVM)

roc_auc = auc(fpr, tpr)

plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='Random')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc='lower right')
plt.show()
No description has been provided for this image

AdaBoost¶

In [39]:
Ada_param_grid = {
    'n_estimators': [50, 100, 150, 200, 250],
    'learning_rate': [0.01, 0.05, 0.1, 0.5, 1.0, 1.5]
}

adaBoost_model = AdaBoostClassifier()

ada_best_params = gridsearch_optim(Ada_param_grid, adaBoost_model, X_train, y_train, cv=5)
Fitting 5 folds for each of 30 candidates, totalling 150 fits
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
Best Parameters: {'learning_rate': 0.1, 'n_estimators': 250}
Best Score: 0.7733065488020161
Run Time: 44.131510972976685 Seconds
In [40]:
#Retrain SVM model with best parameters
AdaBoost_Model = AdaBoostClassifier(**ada_best_params)
AdaBoost_Model.fit(X_train, y_train)

y_pred_ada = AdaBoost_Model.predict(X_test)

# Evaluate model performance
test_model(y_test, y_pred_ada)
C:\Users\aberg\anaconda3\Lib\site-packages\sklearn\ensemble\_weight_boosting.py:527: FutureWarning: The SAMME.R algorithm (the default) is deprecated and will be removed in 1.6. Use the SAMME algorithm to circumvent this warning.
  warnings.warn(
Accuracy: 0.7883333333333333

Classification Report:
              precision    recall  f1-score   support

           0       0.78      0.79      0.79       593
           1       0.79      0.79      0.79       607

    accuracy                           0.79      1200
   macro avg       0.79      0.79      0.79      1200
weighted avg       0.79      0.79      0.79      1200


Confusion Matrix:
[[468 125]
 [129 478]]

K-NN¶

In [41]:
# Define a grid of hyperparameters to search
knn_param_grid = {
    'n_neighbors': [5, 10, 15, 20],
    'weights': ['uniform', 'distance'],
    'algorithm': ['auto', 'kd_tree', 'ball_tree'],
    'leaf_size': [15, 20, 25],
    'metric': ['minkowski', 'euclidean', 'manhattan']
}

# Instantiate the KNN classifier
knn = KNeighborsClassifier()

# Instantiate GridSearchCV
knn_best_params = gridsearch_optim(knn_param_grid, knn, X_train, y_train, cv=2)
Fitting 2 folds for each of 216 candidates, totalling 432 fits
Best Parameters: {'algorithm': 'auto', 'leaf_size': 15, 'metric': 'minkowski', 'n_neighbors': 20, 'weights': 'distance'}
Best Score: 0.8783984349688198
Run Time: 10.245506525039673 Seconds
In [42]:
#Retrain KNN model with best parameters
knn = KNeighborsClassifier(**knn_best_params)
knn.fit(X_train, y_train)

y_pred_knn = knn.predict(X_test)

# Evaluate model performance
test_report_knn = classification_report(y_test, y_pred_knn)

print("Test Classification Report:")
print(test_report_knn)
Test Classification Report:
              precision    recall  f1-score   support

           0       0.90      0.91      0.90       593
           1       0.91      0.90      0.91       607

    accuracy                           0.91      1200
   macro avg       0.91      0.91      0.90      1200
weighted avg       0.91      0.91      0.91      1200

Neural Network¶

In [43]:
# Define the function to build the model
def create_model(hp):
    model = Sequential()
    model.add(Dense(units=hp.Int('units1', 128, 512, step=64), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(hp.Float('dropout_rate', 0.3, 0.7, step=0.1)))
    model.add(Dense(units=hp.Int('units2', 64, 256, step=64), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(hp.Float('dropout_rate', 0.3, 0.7, step=0.1)))
    model.add(Dense(units=hp.Int('units3', 32, 128, step=32), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(hp.Float('dropout_rate', 0.3, 0.7, step=0.1)))
    model.add(Dense(1, activation='sigmoid'))
    
    optimizer = Adam(learning_rate=hp.Choice('learning_rate', [0.001, 0.01, 0.1]))
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    return model


# Perform random search
tuner = RandomSearch(
    create_model,
    objective='val_accuracy',
    max_trials=3, # 3 seems to be better
    executions_per_trial=3,
    directory='my_dir',
    project_name='my_project')

tuner.search(X_train, y_train, epochs=100, validation_split=0.2)

# Get the best hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
best_model = tuner.hypermodel.build(best_hps)

# Train the model with the best hyperparameters
best_model.fit(X_train, y_train, epochs=100, validation_split=0.2)

# Evaluate the model on the test set
test_loss, test_accuracy = best_model.evaluate(X_test, y_test)
print("Test Accuracy:", test_accuracy)



# 300: was around 94
# 150: 0.9512500166893005
# 50: 0.9512500166893005
# 200: Test Accuracy: 0.9524999856948853
# 100: 0.9537500143051147
Reloading Tuner from my_dir\my_project\tuner0.json
Epoch 1/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.6849 - loss: 0.8229 - val_accuracy: 0.8518 - val_loss: 0.4892
Epoch 2/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.7979 - loss: 0.4689 - val_accuracy: 0.8482 - val_loss: 0.3376
Epoch 3/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.7977 - loss: 0.4280 - val_accuracy: 0.8589 - val_loss: 0.3621
Epoch 4/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8194 - loss: 0.4288 - val_accuracy: 0.8500 - val_loss: 0.3586
Epoch 5/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8177 - loss: 0.4046 - val_accuracy: 0.8554 - val_loss: 0.3555
Epoch 6/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8066 - loss: 0.4683 - val_accuracy: 0.8732 - val_loss: 0.3206
Epoch 7/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8271 - loss: 0.4020 - val_accuracy: 0.7679 - val_loss: 0.5920
Epoch 8/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8193 - loss: 0.4171 - val_accuracy: 0.8643 - val_loss: 0.3120
Epoch 9/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8089 - loss: 0.4184 - val_accuracy: 0.8500 - val_loss: 0.3409
Epoch 10/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8310 - loss: 0.4001 - val_accuracy: 0.8643 - val_loss: 0.3036
Epoch 11/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8296 - loss: 0.3835 - val_accuracy: 0.8679 - val_loss: 0.3429
Epoch 12/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8488 - loss: 0.3648 - val_accuracy: 0.8804 - val_loss: 0.2530
Epoch 13/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8523 - loss: 0.3618 - val_accuracy: 0.8893 - val_loss: 0.2564
Epoch 14/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8376 - loss: 0.3755 - val_accuracy: 0.8679 - val_loss: 0.2876
Epoch 15/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8307 - loss: 0.4010 - val_accuracy: 0.9107 - val_loss: 0.2064
Epoch 16/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8746 - loss: 0.3121 - val_accuracy: 0.8071 - val_loss: 0.3656
Epoch 17/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8475 - loss: 0.3446 - val_accuracy: 0.9143 - val_loss: 0.2081
Epoch 18/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8432 - loss: 0.3767 - val_accuracy: 0.8875 - val_loss: 0.2188
Epoch 19/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8678 - loss: 0.3260 - val_accuracy: 0.8625 - val_loss: 0.3127
Epoch 20/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8408 - loss: 0.3750 - val_accuracy: 0.9107 - val_loss: 0.2569
Epoch 21/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8698 - loss: 0.3317 - val_accuracy: 0.8982 - val_loss: 0.2232
Epoch 22/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8595 - loss: 0.3536 - val_accuracy: 0.9196 - val_loss: 0.1959
Epoch 23/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8757 - loss: 0.3337 - val_accuracy: 0.9143 - val_loss: 0.2097
Epoch 24/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9029 - loss: 0.2738 - val_accuracy: 0.8661 - val_loss: 0.3472
Epoch 25/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8692 - loss: 0.3267 - val_accuracy: 0.9000 - val_loss: 0.3599
Epoch 26/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8633 - loss: 0.3911 - val_accuracy: 0.9214 - val_loss: 0.1956
Epoch 27/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8617 - loss: 0.3452 - val_accuracy: 0.9071 - val_loss: 0.2258
Epoch 28/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8945 - loss: 0.2689 - val_accuracy: 0.9018 - val_loss: 0.2160
Epoch 29/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8857 - loss: 0.3109 - val_accuracy: 0.9321 - val_loss: 0.1847
Epoch 30/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8848 - loss: 0.2907 - val_accuracy: 0.8679 - val_loss: 0.3689
Epoch 31/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8698 - loss: 0.3587 - val_accuracy: 0.9071 - val_loss: 0.2382
Epoch 32/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8858 - loss: 0.3159 - val_accuracy: 0.9250 - val_loss: 0.2210
Epoch 33/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8911 - loss: 0.2932 - val_accuracy: 0.8929 - val_loss: 0.3005
Epoch 34/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8598 - loss: 0.3453 - val_accuracy: 0.9054 - val_loss: 0.2100
Epoch 35/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9049 - loss: 0.2657 - val_accuracy: 0.9125 - val_loss: 0.2156
Epoch 36/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8869 - loss: 0.2937 - val_accuracy: 0.9411 - val_loss: 0.1868
Epoch 37/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8805 - loss: 0.3193 - val_accuracy: 0.9357 - val_loss: 0.1832
Epoch 38/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8959 - loss: 0.2560 - val_accuracy: 0.9143 - val_loss: 0.1926
Epoch 39/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8884 - loss: 0.2961 - val_accuracy: 0.9214 - val_loss: 0.2128
Epoch 40/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8811 - loss: 0.3487 - val_accuracy: 0.9232 - val_loss: 0.2159
Epoch 41/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9123 - loss: 0.2736 - val_accuracy: 0.9250 - val_loss: 0.1950
Epoch 42/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8985 - loss: 0.2561 - val_accuracy: 0.9286 - val_loss: 0.2368
Epoch 43/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8930 - loss: 0.2893 - val_accuracy: 0.9179 - val_loss: 0.2342
Epoch 44/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9074 - loss: 0.2770 - val_accuracy: 0.9304 - val_loss: 0.1714
Epoch 45/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8846 - loss: 0.3006 - val_accuracy: 0.9393 - val_loss: 0.1795
Epoch 46/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8971 - loss: 0.2824 - val_accuracy: 0.8839 - val_loss: 0.3169
Epoch 47/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8985 - loss: 0.2626 - val_accuracy: 0.9321 - val_loss: 0.1797
Epoch 48/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8959 - loss: 0.2664 - val_accuracy: 0.8696 - val_loss: 0.2938
Epoch 49/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8787 - loss: 0.3003 - val_accuracy: 0.9250 - val_loss: 0.1817
Epoch 50/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9150 - loss: 0.2572 - val_accuracy: 0.9286 - val_loss: 0.2273
Epoch 51/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8798 - loss: 0.3195 - val_accuracy: 0.9250 - val_loss: 0.1909
Epoch 52/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8946 - loss: 0.2720 - val_accuracy: 0.9196 - val_loss: 0.2466
Epoch 53/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9073 - loss: 0.2562 - val_accuracy: 0.9179 - val_loss: 0.2362
Epoch 54/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9061 - loss: 0.2520 - val_accuracy: 0.9000 - val_loss: 0.2560
Epoch 55/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8892 - loss: 0.2758 - val_accuracy: 0.9196 - val_loss: 0.1907
Epoch 56/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9076 - loss: 0.2376 - val_accuracy: 0.9393 - val_loss: 0.1750
Epoch 57/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8896 - loss: 0.3111 - val_accuracy: 0.8964 - val_loss: 0.2783
Epoch 58/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8884 - loss: 0.3089 - val_accuracy: 0.8982 - val_loss: 0.2343
Epoch 59/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8923 - loss: 0.2931 - val_accuracy: 0.9429 - val_loss: 0.1790
Epoch 60/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8913 - loss: 0.3001 - val_accuracy: 0.9232 - val_loss: 0.1874
Epoch 61/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9042 - loss: 0.2542 - val_accuracy: 0.9304 - val_loss: 0.1800
Epoch 62/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9030 - loss: 0.2636 - val_accuracy: 0.9179 - val_loss: 0.1899
Epoch 63/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9098 - loss: 0.2376 - val_accuracy: 0.9161 - val_loss: 0.2318
Epoch 64/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8973 - loss: 0.2845 - val_accuracy: 0.8875 - val_loss: 0.3422
Epoch 65/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8706 - loss: 0.4136 - val_accuracy: 0.9196 - val_loss: 0.2163
Epoch 66/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9032 - loss: 0.2575 - val_accuracy: 0.9179 - val_loss: 0.2031
Epoch 67/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8848 - loss: 0.3214 - val_accuracy: 0.9089 - val_loss: 0.2441
Epoch 68/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9131 - loss: 0.2355 - val_accuracy: 0.8714 - val_loss: 0.4161
Epoch 69/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8728 - loss: 0.3377 - val_accuracy: 0.9214 - val_loss: 0.2333
Epoch 70/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9139 - loss: 0.2329 - val_accuracy: 0.9446 - val_loss: 0.1795
Epoch 71/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9249 - loss: 0.2217 - val_accuracy: 0.9143 - val_loss: 0.2121
Epoch 72/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9085 - loss: 0.2596 - val_accuracy: 0.9214 - val_loss: 0.2597
Epoch 73/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8983 - loss: 0.2911 - val_accuracy: 0.9375 - val_loss: 0.1794
Epoch 74/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9084 - loss: 0.2660 - val_accuracy: 0.9375 - val_loss: 0.1739
Epoch 75/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9074 - loss: 0.2488 - val_accuracy: 0.9036 - val_loss: 0.2559
Epoch 76/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8976 - loss: 0.2582 - val_accuracy: 0.9214 - val_loss: 0.2711
Epoch 77/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9218 - loss: 0.2229 - val_accuracy: 0.9375 - val_loss: 0.1776
Epoch 78/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9066 - loss: 0.2797 - val_accuracy: 0.9000 - val_loss: 0.2342
Epoch 79/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8853 - loss: 0.3171 - val_accuracy: 0.8946 - val_loss: 0.3087
Epoch 80/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8993 - loss: 0.2967 - val_accuracy: 0.9375 - val_loss: 0.1969
Epoch 81/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8855 - loss: 0.3538 - val_accuracy: 0.9321 - val_loss: 0.1822
Epoch 82/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8990 - loss: 0.2551 - val_accuracy: 0.9161 - val_loss: 0.3071
Epoch 83/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8930 - loss: 0.3097 - val_accuracy: 0.9107 - val_loss: 0.2842
Epoch 84/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8959 - loss: 0.3211 - val_accuracy: 0.9107 - val_loss: 0.9317
Epoch 85/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8852 - loss: 0.3327 - val_accuracy: 0.9214 - val_loss: 0.1706
Epoch 86/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8980 - loss: 0.2568 - val_accuracy: 0.9375 - val_loss: 0.1649
Epoch 87/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9054 - loss: 0.2513 - val_accuracy: 0.9304 - val_loss: 0.1920
Epoch 88/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9127 - loss: 0.2436 - val_accuracy: 0.9286 - val_loss: 0.1723
Epoch 89/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9109 - loss: 0.2479 - val_accuracy: 0.9286 - val_loss: 0.1807
Epoch 90/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9058 - loss: 0.2294 - val_accuracy: 0.9357 - val_loss: 0.1687
Epoch 91/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9174 - loss: 0.2327 - val_accuracy: 0.9214 - val_loss: 0.2089
Epoch 92/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8947 - loss: 0.2633 - val_accuracy: 0.9339 - val_loss: 0.1851
Epoch 93/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8892 - loss: 0.2895 - val_accuracy: 0.9411 - val_loss: 0.1824
Epoch 94/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9213 - loss: 0.2203 - val_accuracy: 0.9321 - val_loss: 0.1731
Epoch 95/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9117 - loss: 0.2413 - val_accuracy: 0.9357 - val_loss: 0.1649
Epoch 96/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9232 - loss: 0.2144 - val_accuracy: 0.9196 - val_loss: 0.1807
Epoch 97/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9148 - loss: 0.2307 - val_accuracy: 0.9357 - val_loss: 0.1713
Epoch 98/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9065 - loss: 0.2567 - val_accuracy: 0.9321 - val_loss: 0.1744
Epoch 99/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9201 - loss: 0.2432 - val_accuracy: 0.9357 - val_loss: 0.1869
Epoch 100/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9113 - loss: 0.2301 - val_accuracy: 0.9214 - val_loss: 0.2107
38/38 ━━━━━━━━━━━━━━━━━━━━ 0s 764us/step - accuracy: 0.9281 - loss: 0.2335
Test Accuracy: 0.9266666769981384
In [44]:
#Assuming y_true contains the true labels and y_score contains the predicted probabilities
#y_true should be a binary vector of true labels (0 or 1)
#y_score should be a vector of predicted probabilities for the positive class
#Calculate ROC AUC
y_pred_nn = best_model.predict(X_test)

roc_auc = roc_auc_score(y_test, y_pred_nn)

print("ROC AUC:", roc_auc)
38/38 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step 
ROC AUC: 0.9809529630421918
In [45]:
fpr, tpr, thresholds = roc_curve(y_test, y_pred_nn)

roc_auc = auc(fpr, tpr)

plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='Random')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc='lower right')
plt.show()
No description has been provided for this image
In [46]:
# Define the neural network model
model = Sequential([
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(64, activation='relu'),
    Dropout(0.5),
    Dense(64, activation='relu'),
    Dropout(0.5),
    Dense(32, activation='relu'),
    Dropout(0.5),
    Dense(1, activation='sigmoid')
])

# Compile the model
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the model
model.fit(X_train, y_train, epochs=200, batch_size=16, verbose=1)

# Evaluate the model on the testing data
y_pred = model.predict(X_test)

#test_model(y_test, y_pred)
Epoch 1/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 1s 863us/step - accuracy: 0.5402 - loss: 0.7388
Epoch 2/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 768us/step - accuracy: 0.6555 - loss: 0.6131
Epoch 3/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 662us/step - accuracy: 0.7504 - loss: 0.5252
Epoch 4/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 672us/step - accuracy: 0.7776 - loss: 0.4645
Epoch 5/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 725us/step - accuracy: 0.7969 - loss: 0.4411
Epoch 6/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 771us/step - accuracy: 0.8089 - loss: 0.4165
Epoch 7/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 746us/step - accuracy: 0.8351 - loss: 0.3820
Epoch 8/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 766us/step - accuracy: 0.8491 - loss: 0.3877
Epoch 9/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 756us/step - accuracy: 0.8418 - loss: 0.3751
Epoch 10/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 750us/step - accuracy: 0.8364 - loss: 0.3636
Epoch 11/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.8343 - loss: 0.3742
Epoch 12/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 690us/step - accuracy: 0.8530 - loss: 0.3548
Epoch 13/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 673us/step - accuracy: 0.8485 - loss: 0.3309
Epoch 14/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.8580 - loss: 0.3371
Epoch 15/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 703us/step - accuracy: 0.8572 - loss: 0.3331
Epoch 16/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 678us/step - accuracy: 0.8665 - loss: 0.3229
Epoch 17/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 664us/step - accuracy: 0.8653 - loss: 0.3312
Epoch 18/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 673us/step - accuracy: 0.8727 - loss: 0.3070
Epoch 19/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.8541 - loss: 0.3403
Epoch 20/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 678us/step - accuracy: 0.8623 - loss: 0.3110
Epoch 21/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 712us/step - accuracy: 0.8691 - loss: 0.3051
Epoch 22/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.8675 - loss: 0.3038
Epoch 23/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 725us/step - accuracy: 0.8841 - loss: 0.2803
Epoch 24/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 689us/step - accuracy: 0.8814 - loss: 0.3040
Epoch 25/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 691us/step - accuracy: 0.8724 - loss: 0.3047
Epoch 26/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.8809 - loss: 0.3050
Epoch 27/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.8702 - loss: 0.3016
Epoch 28/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.8753 - loss: 0.3124
Epoch 29/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 713us/step - accuracy: 0.8851 - loss: 0.2723
Epoch 30/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 766us/step - accuracy: 0.8832 - loss: 0.2805
Epoch 31/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 729us/step - accuracy: 0.8871 - loss: 0.2834
Epoch 32/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 711us/step - accuracy: 0.8734 - loss: 0.2964
Epoch 33/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 727us/step - accuracy: 0.8886 - loss: 0.2678
Epoch 34/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 725us/step - accuracy: 0.8891 - loss: 0.2720
Epoch 35/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 745us/step - accuracy: 0.8829 - loss: 0.2875
Epoch 36/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 763us/step - accuracy: 0.8840 - loss: 0.2788
Epoch 37/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 740us/step - accuracy: 0.8903 - loss: 0.2813
Epoch 38/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 734us/step - accuracy: 0.8876 - loss: 0.2616
Epoch 39/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 757us/step - accuracy: 0.8941 - loss: 0.2668
Epoch 40/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.8914 - loss: 0.2613
Epoch 41/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 732us/step - accuracy: 0.8896 - loss: 0.2681
Epoch 42/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9005 - loss: 0.2439
Epoch 43/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9048 - loss: 0.2450
Epoch 44/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 738us/step - accuracy: 0.8948 - loss: 0.2479
Epoch 45/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.9027 - loss: 0.2503
Epoch 46/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 678us/step - accuracy: 0.9130 - loss: 0.2359
Epoch 47/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 666us/step - accuracy: 0.9060 - loss: 0.2509
Epoch 48/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 686us/step - accuracy: 0.9029 - loss: 0.2554
Epoch 49/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.9106 - loss: 0.2145
Epoch 50/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 691us/step - accuracy: 0.9092 - loss: 0.2271
Epoch 51/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 701us/step - accuracy: 0.9065 - loss: 0.2459
Epoch 52/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9152 - loss: 0.2306
Epoch 53/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9092 - loss: 0.2220
Epoch 54/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9127 - loss: 0.2243
Epoch 55/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 706us/step - accuracy: 0.9105 - loss: 0.2341
Epoch 56/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.9263 - loss: 0.1881
Epoch 57/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9218 - loss: 0.2157
Epoch 58/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 719us/step - accuracy: 0.9220 - loss: 0.2071
Epoch 59/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 715us/step - accuracy: 0.9105 - loss: 0.2249
Epoch 60/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 703us/step - accuracy: 0.9238 - loss: 0.2139
Epoch 61/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9242 - loss: 0.2110
Epoch 62/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9366 - loss: 0.2007
Epoch 63/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 811us/step - accuracy: 0.9179 - loss: 0.2338
Epoch 64/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9256 - loss: 0.2036
Epoch 65/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9355 - loss: 0.1872
Epoch 66/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.9144 - loss: 0.2272
Epoch 67/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9103 - loss: 0.2186
Epoch 68/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 719us/step - accuracy: 0.9252 - loss: 0.2259
Epoch 69/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 735us/step - accuracy: 0.9268 - loss: 0.1994
Epoch 70/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 764us/step - accuracy: 0.9210 - loss: 0.2070
Epoch 71/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9191 - loss: 0.2066
Epoch 72/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9339 - loss: 0.1883
Epoch 73/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 737us/step - accuracy: 0.9385 - loss: 0.1894
Epoch 74/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 698us/step - accuracy: 0.9339 - loss: 0.2013
Epoch 75/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9331 - loss: 0.2016
Epoch 76/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 676us/step - accuracy: 0.9252 - loss: 0.2119
Epoch 77/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 696us/step - accuracy: 0.9301 - loss: 0.1981
Epoch 78/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.9344 - loss: 0.1998
Epoch 79/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 732us/step - accuracy: 0.9286 - loss: 0.2137
Epoch 80/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 729us/step - accuracy: 0.9245 - loss: 0.1929
Epoch 81/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 714us/step - accuracy: 0.9336 - loss: 0.1812
Epoch 82/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 738us/step - accuracy: 0.9335 - loss: 0.1920
Epoch 83/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9256 - loss: 0.2018
Epoch 84/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 697us/step - accuracy: 0.9371 - loss: 0.1932
Epoch 85/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 695us/step - accuracy: 0.9310 - loss: 0.2251
Epoch 86/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.9416 - loss: 0.1583
Epoch 87/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 754us/step - accuracy: 0.9334 - loss: 0.1753
Epoch 88/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 732us/step - accuracy: 0.9312 - loss: 0.1836
Epoch 89/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 694us/step - accuracy: 0.9340 - loss: 0.1893
Epoch 90/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9455 - loss: 0.1767
Epoch 91/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9241 - loss: 0.1996
Epoch 92/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 680us/step - accuracy: 0.9376 - loss: 0.1680
Epoch 93/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 687us/step - accuracy: 0.9422 - loss: 0.1613
Epoch 94/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 701us/step - accuracy: 0.9331 - loss: 0.1816
Epoch 95/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.9374 - loss: 0.1733
Epoch 96/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9437 - loss: 0.1569
Epoch 97/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.9361 - loss: 0.1746
Epoch 98/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.9405 - loss: 0.1658
Epoch 99/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 698us/step - accuracy: 0.9453 - loss: 0.1648
Epoch 100/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 704us/step - accuracy: 0.9315 - loss: 0.2025
Epoch 101/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9434 - loss: 0.1616
Epoch 102/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9394 - loss: 0.1757
Epoch 103/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 664us/step - accuracy: 0.9467 - loss: 0.1664
Epoch 104/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 656us/step - accuracy: 0.9337 - loss: 0.1707
Epoch 105/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 738us/step - accuracy: 0.9445 - loss: 0.1522
Epoch 106/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9409 - loss: 0.1674
Epoch 107/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9487 - loss: 0.1435
Epoch 108/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 713us/step - accuracy: 0.9396 - loss: 0.1866
Epoch 109/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 689us/step - accuracy: 0.9390 - loss: 0.1707
Epoch 110/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9347 - loss: 0.1794
Epoch 111/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9319 - loss: 0.1771
Epoch 112/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 712us/step - accuracy: 0.9524 - loss: 0.1419
Epoch 113/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9467 - loss: 0.1852
Epoch 114/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 734us/step - accuracy: 0.9355 - loss: 0.1589
Epoch 115/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 691us/step - accuracy: 0.9488 - loss: 0.1448
Epoch 116/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9461 - loss: 0.1401
Epoch 117/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 688us/step - accuracy: 0.9366 - loss: 0.1649
Epoch 118/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 694us/step - accuracy: 0.9499 - loss: 0.1630
Epoch 119/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 653us/step - accuracy: 0.9415 - loss: 0.1634
Epoch 120/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 681us/step - accuracy: 0.9382 - loss: 0.1653
Epoch 121/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 687us/step - accuracy: 0.9473 - loss: 0.1467
Epoch 122/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 713us/step - accuracy: 0.9477 - loss: 0.1586
Epoch 123/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 736us/step - accuracy: 0.9491 - loss: 0.1486
Epoch 124/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 734us/step - accuracy: 0.9394 - loss: 0.1710
Epoch 125/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 690us/step - accuracy: 0.9425 - loss: 0.1671
Epoch 126/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9396 - loss: 0.1522
Epoch 127/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 696us/step - accuracy: 0.9472 - loss: 0.1445
Epoch 128/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9518 - loss: 0.1503
Epoch 129/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 715us/step - accuracy: 0.9507 - loss: 0.1389
Epoch 130/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9488 - loss: 0.1660
Epoch 131/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 712us/step - accuracy: 0.9420 - loss: 0.1649
Epoch 132/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 681us/step - accuracy: 0.9482 - loss: 0.1452
Epoch 133/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 698us/step - accuracy: 0.9490 - loss: 0.1566
Epoch 134/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 689us/step - accuracy: 0.9412 - loss: 0.1581
Epoch 135/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 746us/step - accuracy: 0.9474 - loss: 0.1503
Epoch 136/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 728us/step - accuracy: 0.9374 - loss: 0.1630
Epoch 137/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9515 - loss: 0.1395
Epoch 138/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 706us/step - accuracy: 0.9537 - loss: 0.1430
Epoch 139/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9300 - loss: 0.1787
Epoch 140/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 779us/step - accuracy: 0.9482 - loss: 0.1610
Epoch 141/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 736us/step - accuracy: 0.9515 - loss: 0.1485
Epoch 142/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 723us/step - accuracy: 0.9557 - loss: 0.1518
Epoch 143/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 719us/step - accuracy: 0.9481 - loss: 0.1571
Epoch 144/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 778us/step - accuracy: 0.9426 - loss: 0.1560
Epoch 145/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 747us/step - accuracy: 0.9475 - loss: 0.1499
Epoch 146/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9378 - loss: 0.1621
Epoch 147/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9524 - loss: 0.1574
Epoch 148/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 764us/step - accuracy: 0.9520 - loss: 0.1488
Epoch 149/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 831us/step - accuracy: 0.9421 - loss: 0.1771
Epoch 150/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 800us/step - accuracy: 0.9545 - loss: 0.1333
Epoch 151/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 762us/step - accuracy: 0.9542 - loss: 0.1397
Epoch 152/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 734us/step - accuracy: 0.9502 - loss: 0.1453
Epoch 153/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 763us/step - accuracy: 0.9569 - loss: 0.1450
Epoch 154/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9441 - loss: 0.1754
Epoch 155/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 708us/step - accuracy: 0.9485 - loss: 0.1611
Epoch 156/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.9555 - loss: 0.1431
Epoch 157/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.9485 - loss: 0.1405
Epoch 158/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9437 - loss: 0.1530
Epoch 159/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 695us/step - accuracy: 0.9442 - loss: 0.1535
Epoch 160/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 696us/step - accuracy: 0.9456 - loss: 0.1462
Epoch 161/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 679us/step - accuracy: 0.9387 - loss: 0.1605
Epoch 162/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 683us/step - accuracy: 0.9543 - loss: 0.1378
Epoch 163/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9466 - loss: 0.1586
Epoch 164/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 736us/step - accuracy: 0.9504 - loss: 0.1455
Epoch 165/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 760us/step - accuracy: 0.9501 - loss: 0.1369
Epoch 166/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 756us/step - accuracy: 0.9475 - loss: 0.1577
Epoch 167/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 713us/step - accuracy: 0.9505 - loss: 0.1341
Epoch 168/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.9501 - loss: 0.1486
Epoch 169/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 722us/step - accuracy: 0.9459 - loss: 0.1546
Epoch 170/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 727us/step - accuracy: 0.9453 - loss: 0.1524
Epoch 171/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.9411 - loss: 0.1653
Epoch 172/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 702us/step - accuracy: 0.9463 - loss: 0.1365
Epoch 173/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.9553 - loss: 0.1384
Epoch 174/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 723us/step - accuracy: 0.9343 - loss: 0.1841
Epoch 175/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 719us/step - accuracy: 0.9453 - loss: 0.1577
Epoch 176/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 722us/step - accuracy: 0.9529 - loss: 0.1417
Epoch 177/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 697us/step - accuracy: 0.9451 - loss: 0.1593
Epoch 178/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 711us/step - accuracy: 0.9534 - loss: 0.1269
Epoch 179/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 722us/step - accuracy: 0.9396 - loss: 0.1640
Epoch 180/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.9457 - loss: 0.1501
Epoch 181/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 704us/step - accuracy: 0.9572 - loss: 0.1380
Epoch 182/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 711us/step - accuracy: 0.9443 - loss: 0.1489
Epoch 183/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 742us/step - accuracy: 0.9498 - loss: 0.1615
Epoch 184/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9488 - loss: 0.1441
Epoch 185/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.9419 - loss: 0.1522
Epoch 186/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 694us/step - accuracy: 0.9496 - loss: 0.1451
Epoch 187/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 725us/step - accuracy: 0.9435 - loss: 0.1443
Epoch 188/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 678us/step - accuracy: 0.9445 - loss: 0.1420
Epoch 189/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 689us/step - accuracy: 0.9527 - loss: 0.1306
Epoch 190/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 697us/step - accuracy: 0.9496 - loss: 0.1412
Epoch 191/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9412 - loss: 0.1429
Epoch 192/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 729us/step - accuracy: 0.9501 - loss: 0.1328
Epoch 193/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 717us/step - accuracy: 0.9537 - loss: 0.1341
Epoch 194/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.9483 - loss: 0.1525
Epoch 195/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9537 - loss: 0.1391
Epoch 196/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 685us/step - accuracy: 0.9430 - loss: 0.1580
Epoch 197/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.9460 - loss: 0.1654
Epoch 198/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 696us/step - accuracy: 0.9426 - loss: 0.1523
Epoch 199/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9462 - loss: 0.1443
Epoch 200/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 915us/step - accuracy: 0.9519 - loss: 0.1712
38/38 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step 
In [47]:
#Assuming y_true contains the true labels and y_score contains the predicted probabilities
#y_true should be a binary vector of true labels (0 or 1)
#y_score should be a vector of predicted probabilities for the positive class
#Calculate ROC AUC
#y_pred = best_model.predict(X_test)

roc_auc = roc_auc_score(y_test, y_pred)

print("ROC AUC:", roc_auc)
ROC AUC: 0.9834699723017855
In [48]:
# Define the neural network model
model_normal = Sequential([
    Dense(128, activation='relu'),
    Dropout(0.5),
    Dense(64, activation='relu'),
    Dropout(0.5),
    Dense(64, activation='relu'),
    Dropout(0.5),
    Dense(32, activation='relu'),
    Dropout(0.5),
    Dense(1, activation='sigmoid')
])

# Compile the model
model_normal.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# Train the model
model_normal.fit(X_train_normal, y_train_normal, epochs=200, batch_size=16, verbose=1)

# Evaluate the model on the testing data
y_pred_normal = model.predict(X_test_normal)

#test_model(y_test, y_pred)
Epoch 1/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 1s 834us/step - accuracy: 0.5120 - loss: 0.7196
Epoch 2/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 771us/step - accuracy: 0.6650 - loss: 0.6215
Epoch 3/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 791us/step - accuracy: 0.7469 - loss: 0.5045
Epoch 4/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 796us/step - accuracy: 0.8131 - loss: 0.4158
Epoch 5/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 725us/step - accuracy: 0.8137 - loss: 0.4055
Epoch 6/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 744us/step - accuracy: 0.8446 - loss: 0.3881
Epoch 7/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 723us/step - accuracy: 0.8462 - loss: 0.3629
Epoch 8/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 792us/step - accuracy: 0.8586 - loss: 0.3467
Epoch 9/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 692us/step - accuracy: 0.8522 - loss: 0.3551
Epoch 10/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 679us/step - accuracy: 0.8583 - loss: 0.3313
Epoch 11/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.8584 - loss: 0.3397
Epoch 12/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 730us/step - accuracy: 0.8584 - loss: 0.3308
Epoch 13/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 728us/step - accuracy: 0.8529 - loss: 0.3400
Epoch 14/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 702us/step - accuracy: 0.8633 - loss: 0.3228
Epoch 15/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.8681 - loss: 0.3231
Epoch 16/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 701us/step - accuracy: 0.8594 - loss: 0.3447
Epoch 17/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.8819 - loss: 0.2811
Epoch 18/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.8747 - loss: 0.3008
Epoch 19/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.8759 - loss: 0.2894
Epoch 20/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 753us/step - accuracy: 0.8805 - loss: 0.2931
Epoch 21/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 726us/step - accuracy: 0.8807 - loss: 0.2914
Epoch 22/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 708us/step - accuracy: 0.8767 - loss: 0.2809
Epoch 23/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 698us/step - accuracy: 0.8811 - loss: 0.2931
Epoch 24/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 671us/step - accuracy: 0.8760 - loss: 0.2835
Epoch 25/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 676us/step - accuracy: 0.8790 - loss: 0.2842
Epoch 26/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 717us/step - accuracy: 0.8799 - loss: 0.2900
Epoch 27/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 694us/step - accuracy: 0.8910 - loss: 0.2508
Epoch 28/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 729us/step - accuracy: 0.8819 - loss: 0.2784
Epoch 29/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 712us/step - accuracy: 0.8889 - loss: 0.2784
Epoch 30/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 714us/step - accuracy: 0.8873 - loss: 0.2687
Epoch 31/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9044 - loss: 0.2397
Epoch 32/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 692us/step - accuracy: 0.9084 - loss: 0.2404
Epoch 33/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 717us/step - accuracy: 0.8921 - loss: 0.2616
Epoch 34/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 736us/step - accuracy: 0.8994 - loss: 0.2460
Epoch 35/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 718us/step - accuracy: 0.9068 - loss: 0.2514
Epoch 36/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.9008 - loss: 0.2446
Epoch 37/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 747us/step - accuracy: 0.9123 - loss: 0.2317
Epoch 38/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9078 - loss: 0.2364
Epoch 39/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 665us/step - accuracy: 0.9145 - loss: 0.2124
Epoch 40/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 668us/step - accuracy: 0.9149 - loss: 0.2291
Epoch 41/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 680us/step - accuracy: 0.9222 - loss: 0.2101
Epoch 42/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9119 - loss: 0.2212
Epoch 43/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9139 - loss: 0.2278
Epoch 44/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9225 - loss: 0.2245
Epoch 45/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 685us/step - accuracy: 0.9110 - loss: 0.2367
Epoch 46/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 746us/step - accuracy: 0.9160 - loss: 0.2144
Epoch 47/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 698us/step - accuracy: 0.9169 - loss: 0.2099
Epoch 48/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 703us/step - accuracy: 0.9178 - loss: 0.2150
Epoch 49/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.9176 - loss: 0.2309
Epoch 50/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9131 - loss: 0.2223
Epoch 51/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 763us/step - accuracy: 0.9267 - loss: 0.1918
Epoch 52/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 769us/step - accuracy: 0.9312 - loss: 0.1806
Epoch 53/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 689us/step - accuracy: 0.9174 - loss: 0.2028
Epoch 54/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9320 - loss: 0.1953
Epoch 55/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 793us/step - accuracy: 0.9228 - loss: 0.2085
Epoch 56/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 734us/step - accuracy: 0.9457 - loss: 0.1631
Epoch 57/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 750us/step - accuracy: 0.9175 - loss: 0.2130
Epoch 58/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 730us/step - accuracy: 0.9240 - loss: 0.2052
Epoch 59/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 703us/step - accuracy: 0.9301 - loss: 0.2027
Epoch 60/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 739us/step - accuracy: 0.9303 - loss: 0.1877
Epoch 61/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9234 - loss: 0.1907
Epoch 62/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 678us/step - accuracy: 0.9355 - loss: 0.1868
Epoch 63/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 719us/step - accuracy: 0.9373 - loss: 0.1955
Epoch 64/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 719us/step - accuracy: 0.9358 - loss: 0.1912
Epoch 65/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 706us/step - accuracy: 0.9260 - loss: 0.1828
Epoch 66/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 691us/step - accuracy: 0.9377 - loss: 0.1849
Epoch 67/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 681us/step - accuracy: 0.9308 - loss: 0.1797
Epoch 68/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 675us/step - accuracy: 0.9341 - loss: 0.1890
Epoch 69/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 653us/step - accuracy: 0.9322 - loss: 0.1896
Epoch 70/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 706us/step - accuracy: 0.9253 - loss: 0.1893
Epoch 71/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 679us/step - accuracy: 0.9370 - loss: 0.1637
Epoch 72/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 735us/step - accuracy: 0.9305 - loss: 0.1964
Epoch 73/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 732us/step - accuracy: 0.9391 - loss: 0.1678
Epoch 74/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.9341 - loss: 0.1756
Epoch 75/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 704us/step - accuracy: 0.9343 - loss: 0.1818
Epoch 76/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 747us/step - accuracy: 0.9331 - loss: 0.1829
Epoch 77/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 737us/step - accuracy: 0.9405 - loss: 0.1529
Epoch 78/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 704us/step - accuracy: 0.9335 - loss: 0.1696
Epoch 79/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 722us/step - accuracy: 0.9422 - loss: 0.1806
Epoch 80/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 790us/step - accuracy: 0.9462 - loss: 0.1530
Epoch 81/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 815us/step - accuracy: 0.9346 - loss: 0.1795
Epoch 82/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 750us/step - accuracy: 0.9406 - loss: 0.1797
Epoch 83/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 696us/step - accuracy: 0.9363 - loss: 0.1953
Epoch 84/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 712us/step - accuracy: 0.9452 - loss: 0.1577
Epoch 85/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 729us/step - accuracy: 0.9405 - loss: 0.1610
Epoch 86/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 706us/step - accuracy: 0.9427 - loss: 0.1739
Epoch 87/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.9304 - loss: 0.1869
Epoch 88/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 717us/step - accuracy: 0.9360 - loss: 0.1757
Epoch 89/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 733us/step - accuracy: 0.9417 - loss: 0.1572
Epoch 90/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 715us/step - accuracy: 0.9354 - loss: 0.1911
Epoch 91/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9406 - loss: 0.1665
Epoch 92/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 675us/step - accuracy: 0.9290 - loss: 0.2147
Epoch 93/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 687us/step - accuracy: 0.9488 - loss: 0.1467
Epoch 94/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 683us/step - accuracy: 0.9348 - loss: 0.1863
Epoch 95/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 681us/step - accuracy: 0.9430 - loss: 0.1675
Epoch 96/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 649us/step - accuracy: 0.9361 - loss: 0.1611
Epoch 97/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 679us/step - accuracy: 0.9486 - loss: 0.1631
Epoch 98/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 738us/step - accuracy: 0.9388 - loss: 0.1785
Epoch 99/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 764us/step - accuracy: 0.9527 - loss: 0.1452
Epoch 100/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.9434 - loss: 0.1686
Epoch 101/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 687us/step - accuracy: 0.9420 - loss: 0.1529
Epoch 102/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 682us/step - accuracy: 0.9447 - loss: 0.1616
Epoch 103/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 701us/step - accuracy: 0.9351 - loss: 0.1873
Epoch 104/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9455 - loss: 0.1501
Epoch 105/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 724us/step - accuracy: 0.9415 - loss: 0.1534
Epoch 106/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 710us/step - accuracy: 0.9461 - loss: 0.1491
Epoch 107/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 731us/step - accuracy: 0.9504 - loss: 0.1573
Epoch 108/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9375 - loss: 0.1787
Epoch 109/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 706us/step - accuracy: 0.9493 - loss: 0.1433
Epoch 110/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 694us/step - accuracy: 0.9443 - loss: 0.1634
Epoch 111/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 681us/step - accuracy: 0.9438 - loss: 0.1633
Epoch 112/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 681us/step - accuracy: 0.9491 - loss: 0.1483
Epoch 113/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 701us/step - accuracy: 0.9388 - loss: 0.1595
Epoch 114/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.9499 - loss: 0.1563
Epoch 115/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9477 - loss: 0.1499
Epoch 116/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 767us/step - accuracy: 0.9425 - loss: 0.1740
Epoch 117/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9466 - loss: 0.1660
Epoch 118/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 720us/step - accuracy: 0.9396 - loss: 0.1809
Epoch 119/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 692us/step - accuracy: 0.9474 - loss: 0.1516
Epoch 120/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 691us/step - accuracy: 0.9394 - loss: 0.1820
Epoch 121/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 685us/step - accuracy: 0.9482 - loss: 0.1641
Epoch 122/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 712us/step - accuracy: 0.9473 - loss: 0.1529
Epoch 123/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 721us/step - accuracy: 0.9426 - loss: 0.1516
Epoch 124/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.9477 - loss: 0.1537
Epoch 125/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 695us/step - accuracy: 0.9445 - loss: 0.1480
Epoch 126/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 688us/step - accuracy: 0.9500 - loss: 0.1460
Epoch 127/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9452 - loss: 0.1557
Epoch 128/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 751us/step - accuracy: 0.9375 - loss: 0.1789
Epoch 129/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 701us/step - accuracy: 0.9477 - loss: 0.1383
Epoch 130/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9440 - loss: 0.1433
Epoch 131/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9444 - loss: 0.1697
Epoch 132/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 708us/step - accuracy: 0.9414 - loss: 0.1634
Epoch 133/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 766us/step - accuracy: 0.9436 - loss: 0.1468
Epoch 134/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 772us/step - accuracy: 0.9444 - loss: 0.1578
Epoch 135/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 699us/step - accuracy: 0.9466 - loss: 0.1520
Epoch 136/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 704us/step - accuracy: 0.9378 - loss: 0.1910
Epoch 137/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 688us/step - accuracy: 0.9479 - loss: 0.1532
Epoch 138/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 686us/step - accuracy: 0.9535 - loss: 0.1405
Epoch 139/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 658us/step - accuracy: 0.9541 - loss: 0.1626
Epoch 140/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 658us/step - accuracy: 0.9334 - loss: 0.1949
Epoch 141/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 685us/step - accuracy: 0.9535 - loss: 0.1533
Epoch 142/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 752us/step - accuracy: 0.9476 - loss: 0.1775
Epoch 143/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 749us/step - accuracy: 0.9465 - loss: 0.1496
Epoch 144/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9527 - loss: 0.1541
Epoch 145/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 700us/step - accuracy: 0.9553 - loss: 0.1439
Epoch 146/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9534 - loss: 0.1304
Epoch 147/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 677us/step - accuracy: 0.9375 - loss: 0.1807
Epoch 148/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 690us/step - accuracy: 0.9552 - loss: 0.1406
Epoch 149/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 709us/step - accuracy: 0.9538 - loss: 0.1269
Epoch 150/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 693us/step - accuracy: 0.9517 - loss: 0.1474
Epoch 151/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 745us/step - accuracy: 0.9509 - loss: 0.1424
Epoch 152/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 708us/step - accuracy: 0.9571 - loss: 0.1464
Epoch 153/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 687us/step - accuracy: 0.9376 - loss: 0.1679
Epoch 154/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 714us/step - accuracy: 0.9443 - loss: 0.1439
Epoch 155/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 667us/step - accuracy: 0.9456 - loss: 0.1445
Epoch 156/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 679us/step - accuracy: 0.9453 - loss: 0.1701
Epoch 157/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 678us/step - accuracy: 0.9493 - loss: 0.1489
Epoch 158/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 695us/step - accuracy: 0.9458 - loss: 0.1658
Epoch 159/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 740us/step - accuracy: 0.9558 - loss: 0.1281
Epoch 160/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 742us/step - accuracy: 0.9526 - loss: 0.1428
Epoch 161/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9549 - loss: 0.1281
Epoch 162/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 695us/step - accuracy: 0.9458 - loss: 0.1440
Epoch 163/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9444 - loss: 0.1864
Epoch 164/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 724us/step - accuracy: 0.9472 - loss: 0.1517
Epoch 165/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 725us/step - accuracy: 0.9514 - loss: 0.1418
Epoch 166/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 735us/step - accuracy: 0.9432 - loss: 0.1707
Epoch 167/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 688us/step - accuracy: 0.9595 - loss: 0.1225
Epoch 168/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 755us/step - accuracy: 0.9427 - loss: 0.1614
Epoch 169/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 781us/step - accuracy: 0.9493 - loss: 0.1405
Epoch 170/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 739us/step - accuracy: 0.9467 - loss: 0.1507
Epoch 171/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 737us/step - accuracy: 0.9557 - loss: 0.1346
Epoch 172/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 732us/step - accuracy: 0.9470 - loss: 0.1588
Epoch 173/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9489 - loss: 0.1569
Epoch 174/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 747us/step - accuracy: 0.9453 - loss: 0.1360
Epoch 175/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 747us/step - accuracy: 0.9443 - loss: 0.1492
Epoch 176/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 686us/step - accuracy: 0.9509 - loss: 0.1507
Epoch 177/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 727us/step - accuracy: 0.9500 - loss: 0.1450
Epoch 178/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 726us/step - accuracy: 0.9460 - loss: 0.1504
Epoch 179/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 717us/step - accuracy: 0.9518 - loss: 0.1391
Epoch 180/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 707us/step - accuracy: 0.9500 - loss: 0.1410
Epoch 181/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 660us/step - accuracy: 0.9461 - loss: 0.1397
Epoch 182/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 662us/step - accuracy: 0.9486 - loss: 0.1566
Epoch 183/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 661us/step - accuracy: 0.9531 - loss: 0.1474
Epoch 184/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 765us/step - accuracy: 0.9506 - loss: 0.1359
Epoch 185/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 745us/step - accuracy: 0.9455 - loss: 0.1567
Epoch 186/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 826us/step - accuracy: 0.9598 - loss: 0.1285
Epoch 187/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 762us/step - accuracy: 0.9609 - loss: 0.1357
Epoch 188/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 780us/step - accuracy: 0.9598 - loss: 0.1306
Epoch 189/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 741us/step - accuracy: 0.9377 - loss: 0.1483
Epoch 190/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 735us/step - accuracy: 0.9591 - loss: 0.1348
Epoch 191/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 747us/step - accuracy: 0.9423 - loss: 0.1612
Epoch 192/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 737us/step - accuracy: 0.9532 - loss: 0.1443
Epoch 193/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 736us/step - accuracy: 0.9545 - loss: 0.1443
Epoch 194/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 768us/step - accuracy: 0.9496 - loss: 0.1442
Epoch 195/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 705us/step - accuracy: 0.9519 - loss: 0.1390
Epoch 196/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 716us/step - accuracy: 0.9531 - loss: 0.1193
Epoch 197/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 684us/step - accuracy: 0.9499 - loss: 0.1556
Epoch 198/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 726us/step - accuracy: 0.9636 - loss: 0.1209
Epoch 199/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 715us/step - accuracy: 0.9490 - loss: 0.1604
Epoch 200/200
175/175 ━━━━━━━━━━━━━━━━━━━━ 0s 695us/step - accuracy: 0.9522 - loss: 0.1197
38/38 ━━━━━━━━━━━━━━━━━━━━ 0s 654us/step
In [49]:
#Assuming y_true contains the true labels and y_score contains the predicted probabilities
#y_true should be a binary vector of true labels (0 or 1)
#y_score should be a vector of predicted probabilities for the positive class
#Calculate ROC AUC
#y_pred = best_model.predict(X_test)

roc_auc_normal = roc_auc_score(y_test_normal, y_pred_normal)

print("ROC AUC:", roc_auc_normal)
ROC AUC: 0.751848168222897
In [50]:
# Define the function to build the model
def create_model(hp):
    model = Sequential()
    model.add(Dense(units=hp.Int('units1', 128, 512, step=64), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(hp.Float('dropout_rate', 0.3, 0.7, step=0.1)))
    model.add(Dense(units=hp.Int('units2', 64, 256, step=64), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(hp.Float('dropout_rate', 0.3, 0.7, step=0.1)))
    model.add(Dense(units=hp.Int('units3', 32, 128, step=32), activation='relu'))
    model.add(BatchNormalization())
    model.add(Dropout(hp.Float('dropout_rate', 0.3, 0.7, step=0.1)))
    model.add(Dense(1, activation='sigmoid'))
    
    optimizer = Adam(learning_rate=hp.Choice('learning_rate', [0.001, 0.01, 0.1]))
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    return model


# Perform random search
tuner = RandomSearch(
    create_model,
    objective='val_accuracy',
    max_trials=3, # 3 seems to be better
    executions_per_trial=3,
    directory='my_dir',
    project_name='my_project')

tuner.search(X_train_normal, y_train_normal, epochs=100, validation_split=0.2)

# Get the best hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
best_model = tuner.hypermodel.build(best_hps)

# Train the model with the best hyperparameters
best_model.fit(X_train_normal, y_train_normal, epochs=100, validation_split=0.2)

# Evaluate the model on the test set
test_loss, test_accuracy = best_model.evaluate(X_test_normal, y_test_normal)
print("Test Accuracy:", test_accuracy)



# 300: was around 94
# 150: 0.9512500166893005
# 50: 0.9512500166893005
# 200: Test Accuracy: 0.9524999856948853
# 100: 0.9537500143051147
Reloading Tuner from my_dir\my_project\tuner0.json
Epoch 1/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.7040 - loss: 0.7532 - val_accuracy: 0.8161 - val_loss: 0.6246
Epoch 2/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8114 - loss: 0.4243 - val_accuracy: 0.8161 - val_loss: 0.4139
Epoch 3/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8190 - loss: 0.4239 - val_accuracy: 0.8464 - val_loss: 0.3462
Epoch 4/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8089 - loss: 0.4157 - val_accuracy: 0.8446 - val_loss: 0.4254
Epoch 5/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8165 - loss: 0.4058 - val_accuracy: 0.8446 - val_loss: 0.3216
Epoch 6/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8245 - loss: 0.4060 - val_accuracy: 0.8661 - val_loss: 0.2838
Epoch 7/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8070 - loss: 0.4321 - val_accuracy: 0.8321 - val_loss: 0.4171
Epoch 8/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8410 - loss: 0.3845 - val_accuracy: 0.8446 - val_loss: 0.3192
Epoch 9/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8459 - loss: 0.3860 - val_accuracy: 0.8196 - val_loss: 0.4072
Epoch 10/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8160 - loss: 0.4701 - val_accuracy: 0.7571 - val_loss: 0.4905
Epoch 11/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8264 - loss: 0.3999 - val_accuracy: 0.8500 - val_loss: 0.3838
Epoch 12/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8564 - loss: 0.3481 - val_accuracy: 0.8625 - val_loss: 0.4213
Epoch 13/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8349 - loss: 0.3739 - val_accuracy: 0.8821 - val_loss: 0.2618
Epoch 14/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8472 - loss: 0.3550 - val_accuracy: 0.8393 - val_loss: 0.3494
Epoch 15/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8608 - loss: 0.3267 - val_accuracy: 0.8893 - val_loss: 0.2482
Epoch 16/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8687 - loss: 0.3161 - val_accuracy: 0.8911 - val_loss: 0.3291
Epoch 17/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8339 - loss: 0.4325 - val_accuracy: 0.8839 - val_loss: 0.2720
Epoch 18/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8454 - loss: 0.3437 - val_accuracy: 0.8964 - val_loss: 0.2269
Epoch 19/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8592 - loss: 0.3492 - val_accuracy: 0.9143 - val_loss: 0.2103
Epoch 20/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8606 - loss: 0.3740 - val_accuracy: 0.9018 - val_loss: 0.2586
Epoch 21/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8582 - loss: 0.3649 - val_accuracy: 0.9071 - val_loss: 0.2552
Epoch 22/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8673 - loss: 0.3482 - val_accuracy: 0.8857 - val_loss: 0.3012
Epoch 23/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8628 - loss: 0.3431 - val_accuracy: 0.8839 - val_loss: 0.2722
Epoch 24/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8569 - loss: 0.3377 - val_accuracy: 0.8964 - val_loss: 0.2719
Epoch 25/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8643 - loss: 0.3403 - val_accuracy: 0.9161 - val_loss: 0.2117
Epoch 26/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8557 - loss: 0.3976 - val_accuracy: 0.9125 - val_loss: 0.2177
Epoch 27/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8898 - loss: 0.2768 - val_accuracy: 0.9054 - val_loss: 0.2772
Epoch 28/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8880 - loss: 0.2952 - val_accuracy: 0.9214 - val_loss: 0.1824
Epoch 29/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8609 - loss: 0.3434 - val_accuracy: 0.8804 - val_loss: 0.3585
Epoch 30/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8917 - loss: 0.2821 - val_accuracy: 0.9321 - val_loss: 0.1830
Epoch 31/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8547 - loss: 0.3972 - val_accuracy: 0.9071 - val_loss: 0.2231
Epoch 32/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8871 - loss: 0.2710 - val_accuracy: 0.9143 - val_loss: 0.1978
Epoch 33/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8963 - loss: 0.2493 - val_accuracy: 0.8964 - val_loss: 0.2563
Epoch 34/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8906 - loss: 0.2941 - val_accuracy: 0.8929 - val_loss: 0.2758
Epoch 35/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9094 - loss: 0.2523 - val_accuracy: 0.9143 - val_loss: 0.2384
Epoch 36/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9024 - loss: 0.2402 - val_accuracy: 0.9321 - val_loss: 0.1963
Epoch 37/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8858 - loss: 0.3162 - val_accuracy: 0.9232 - val_loss: 0.2345
Epoch 38/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8966 - loss: 0.2782 - val_accuracy: 0.9339 - val_loss: 0.1830
Epoch 39/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8839 - loss: 0.2966 - val_accuracy: 0.8946 - val_loss: 0.2890
Epoch 40/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8622 - loss: 0.3863 - val_accuracy: 0.9018 - val_loss: 0.2189
Epoch 41/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8727 - loss: 0.3221 - val_accuracy: 0.9286 - val_loss: 0.1975
Epoch 42/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9000 - loss: 0.2577 - val_accuracy: 0.9232 - val_loss: 0.2666
Epoch 43/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8985 - loss: 0.2672 - val_accuracy: 0.9196 - val_loss: 0.2701
Epoch 44/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9024 - loss: 0.2611 - val_accuracy: 0.9196 - val_loss: 0.2020
Epoch 45/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8765 - loss: 0.2967 - val_accuracy: 0.9161 - val_loss: 0.2137
Epoch 46/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8774 - loss: 0.3223 - val_accuracy: 0.9161 - val_loss: 0.2147
Epoch 47/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8872 - loss: 0.3150 - val_accuracy: 0.9232 - val_loss: 0.2064
Epoch 48/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8722 - loss: 0.3439 - val_accuracy: 0.9304 - val_loss: 0.2162
Epoch 49/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9052 - loss: 0.2551 - val_accuracy: 0.9268 - val_loss: 0.2027
Epoch 50/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9110 - loss: 0.2641 - val_accuracy: 0.9143 - val_loss: 0.2146
Epoch 51/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9142 - loss: 0.2260 - val_accuracy: 0.9339 - val_loss: 0.1774
Epoch 52/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8972 - loss: 0.2703 - val_accuracy: 0.9107 - val_loss: 0.2315
Epoch 53/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8954 - loss: 0.2592 - val_accuracy: 0.9036 - val_loss: 0.2287
Epoch 54/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8858 - loss: 0.3092 - val_accuracy: 0.9018 - val_loss: 0.2453
Epoch 55/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9079 - loss: 0.3161 - val_accuracy: 0.9143 - val_loss: 0.2372
Epoch 56/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9048 - loss: 0.2634 - val_accuracy: 0.9321 - val_loss: 0.2082
Epoch 57/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9058 - loss: 0.2646 - val_accuracy: 0.9036 - val_loss: 0.2696
Epoch 58/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8975 - loss: 0.3085 - val_accuracy: 0.8875 - val_loss: 0.3023
Epoch 59/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8779 - loss: 0.2863 - val_accuracy: 0.9196 - val_loss: 0.2067
Epoch 60/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9160 - loss: 0.2508 - val_accuracy: 0.9429 - val_loss: 0.1715
Epoch 61/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9032 - loss: 0.2470 - val_accuracy: 0.9161 - val_loss: 0.2870
Epoch 62/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8914 - loss: 0.3180 - val_accuracy: 0.9196 - val_loss: 0.2180
Epoch 63/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9047 - loss: 0.2746 - val_accuracy: 0.9304 - val_loss: 0.1900
Epoch 64/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9217 - loss: 0.2127 - val_accuracy: 0.9179 - val_loss: 0.1993
Epoch 65/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8845 - loss: 0.3250 - val_accuracy: 0.9268 - val_loss: 0.2034
Epoch 66/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9128 - loss: 0.2499 - val_accuracy: 0.9125 - val_loss: 0.2211
Epoch 67/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9157 - loss: 0.2270 - val_accuracy: 0.9357 - val_loss: 0.2086
Epoch 68/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.8958 - loss: 0.2903 - val_accuracy: 0.8982 - val_loss: 0.3207
Epoch 69/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9080 - loss: 0.2654 - val_accuracy: 0.9286 - val_loss: 0.2377
Epoch 70/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8816 - loss: 0.3448 - val_accuracy: 0.9321 - val_loss: 0.2401
Epoch 71/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8985 - loss: 0.3153 - val_accuracy: 0.9321 - val_loss: 0.2243
Epoch 72/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9040 - loss: 0.2851 - val_accuracy: 0.9393 - val_loss: 0.2020
Epoch 73/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8986 - loss: 0.3026 - val_accuracy: 0.9089 - val_loss: 0.2165
Epoch 74/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8690 - loss: 0.3263 - val_accuracy: 0.9232 - val_loss: 0.2222
Epoch 75/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8907 - loss: 0.2996 - val_accuracy: 0.9321 - val_loss: 0.2092
Epoch 76/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8952 - loss: 0.3203 - val_accuracy: 0.9357 - val_loss: 0.1899
Epoch 77/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9014 - loss: 0.2707 - val_accuracy: 0.9161 - val_loss: 0.2202
Epoch 78/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8942 - loss: 0.2798 - val_accuracy: 0.9250 - val_loss: 0.1721
Epoch 79/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8917 - loss: 0.2861 - val_accuracy: 0.9125 - val_loss: 0.2829
Epoch 80/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8768 - loss: 0.3890 - val_accuracy: 0.9357 - val_loss: 0.1757
Epoch 81/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8946 - loss: 0.2578 - val_accuracy: 0.9304 - val_loss: 0.2035
Epoch 82/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9162 - loss: 0.2320 - val_accuracy: 0.8982 - val_loss: 0.2714
Epoch 83/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8991 - loss: 0.2398 - val_accuracy: 0.9464 - val_loss: 0.1740
Epoch 84/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8901 - loss: 0.2863 - val_accuracy: 0.9250 - val_loss: 0.1980
Epoch 85/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9018 - loss: 0.2530 - val_accuracy: 0.9357 - val_loss: 0.1475
Epoch 86/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9235 - loss: 0.1959 - val_accuracy: 0.8929 - val_loss: 0.2842
Epoch 87/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9130 - loss: 0.2804 - val_accuracy: 0.9375 - val_loss: 0.1678
Epoch 88/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9191 - loss: 0.2339 - val_accuracy: 0.9339 - val_loss: 0.1727
Epoch 89/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.8999 - loss: 0.2457 - val_accuracy: 0.9321 - val_loss: 0.1878
Epoch 90/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9221 - loss: 0.2113 - val_accuracy: 0.9339 - val_loss: 0.1972
Epoch 91/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9130 - loss: 0.2323 - val_accuracy: 0.9286 - val_loss: 0.1826
Epoch 92/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9226 - loss: 0.2340 - val_accuracy: 0.9214 - val_loss: 0.2221
Epoch 93/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9085 - loss: 0.2386 - val_accuracy: 0.9107 - val_loss: 0.2477
Epoch 94/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9038 - loss: 0.2300 - val_accuracy: 0.9321 - val_loss: 0.2024
Epoch 95/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9184 - loss: 0.2353 - val_accuracy: 0.9214 - val_loss: 0.2107
Epoch 96/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9247 - loss: 0.2124 - val_accuracy: 0.8768 - val_loss: 0.4173
Epoch 97/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9062 - loss: 0.2749 - val_accuracy: 0.9286 - val_loss: 0.1852
Epoch 98/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9110 - loss: 0.2202 - val_accuracy: 0.9054 - val_loss: 0.2485
Epoch 99/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9071 - loss: 0.2542 - val_accuracy: 0.9339 - val_loss: 0.1793
Epoch 100/100
70/70 ━━━━━━━━━━━━━━━━━━━━ 0s 1ms/step - accuracy: 0.9155 - loss: 0.2507 - val_accuracy: 0.9232 - val_loss: 0.2114
38/38 ━━━━━━━━━━━━━━━━━━━━ 0s 839us/step - accuracy: 0.9285 - loss: 0.2080
Test Accuracy: 0.9350000023841858
In [51]:
#Assuming y_true contains the true labels and y_score contains the predicted probabilities
#y_true should be a binary vector of true labels (0 or 1)
#y_score should be a vector of predicted probabilities for the positive class
#Calculate ROC AUC
y_pred_nn = best_model.predict(X_test)

roc_auc = roc_auc_score(y_test, y_pred_nn)

print("ROC AUC:", roc_auc)
38/38 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step 
ROC AUC: 0.9244938894460635
In [52]:
fpr, tpr, thresholds = roc_curve(y_test, y_pred_nn)

roc_auc = auc(fpr, tpr)

plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (AUC = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', label='Random')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc='lower right')
plt.show()
No description has been provided for this image
In [ ]: